Make map_domain_page_global fail
authorJeremy Fitzhardinge <jeremy@xensource.com>
Thu, 24 May 2007 09:45:03 +0000 (10:45 +0100)
committerJeremy Fitzhardinge <jeremy@xensource.com>
Thu, 24 May 2007 09:45:03 +0000 (10:45 +0100)
When the global mapping cache runs out, make map_domain_page_global
return NULL on failure rather than fire an assertion failure.  This
also updates the callers to handle the error gracefully.

The only exception to this is the shadow pagetable code, which uses
map_domain_page_global to create a mapping for
v->arch.paging.shadow.guest_vtable; it's not clear this needs to be a
global mapping anyway.

Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
xen/arch/x86/hvm/vlapic.c
xen/arch/x86/mm/shadow/multi.c
xen/arch/x86/x86_32/domain_page.c

index 8e1dbd311d57b32ac3bb52b0c62e6c2aae030784..e9d6330f31ee52106d326253ac072452a3920c2a 100644 (file)
@@ -918,12 +918,19 @@ int vlapic_init(struct vcpu *v)
     vlapic->regs_page = alloc_domheap_page(NULL);
     if ( vlapic->regs_page == NULL )
     {
-        dprintk(XENLOG_ERR, "malloc vlapic regs error for vcpu %x\n",
+        dprintk(XENLOG_ERR, "malloc vlapic regs_page error for vcpu %x\n",
                 v->vcpu_id);
         return -ENOMEM;
     }
 
     vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
+    if ( vlapic->regs == NULL )
+    {
+        dprintk(XENLOG_ERR, "malloc vlapic regs error for vcpu %x\n",
+                v->vcpu_id);
+       return -ENOMEM;
+    }
+
     memset(vlapic->regs, 0, PAGE_SIZE);
 
     vlapic_reset(vlapic);
index f55382ac292bb88e4628074078127ce7680513ec..57e25c829fb178a9ebe79754dd22a563dbd5d79f 100644 (file)
@@ -3485,6 +3485,8 @@ sh_update_cr3(struct vcpu *v, int do_locking)
         if ( v->arch.paging.shadow.guest_vtable )
             sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
         v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
+        /* PAGING_LEVELS==4 implies 64-bit, which means that
+         * map_domain_page_global can't fail */
     }
     else
         v->arch.paging.shadow.guest_vtable = __linear_l4_table;
@@ -3515,6 +3517,9 @@ sh_update_cr3(struct vcpu *v, int do_locking)
         if ( v->arch.paging.shadow.guest_vtable )
             sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
         v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
+        /* Does this really need map_domain_page_global?  Handle the
+         * error properly if so. */
+        ASSERT( v->arch.paging.shadow.guest_vtable );
     }
     else
         v->arch.paging.shadow.guest_vtable = __linear_l2_table;
index 551ce5059309d3dcb11a23f49caf2e5335854765..59c129ee1339f78a027378995eab9608af0bb89f 100644 (file)
@@ -218,17 +218,25 @@ void *map_domain_page_global(unsigned long mfn)
 
         idx = find_first_zero_bit(inuse, GLOBALMAP_BITS);
         va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
-        ASSERT(va < FIXADDR_START);
+        if ( va >= FIXADDR_START )
+        {
+            va = 0;
+            goto fail;
+        }
     }
 
     set_bit(idx, inuse);
     inuse_cursor = idx + 1;
 
+  fail:
     spin_unlock(&globalmap_lock);
 
-    pl2e = virt_to_xen_l2e(va);
-    pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
-    l1e_write(pl1e, l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
+    if ( likely(va != 0) )
+    {
+       pl2e = virt_to_xen_l2e(va);
+       pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
+       l1e_write(pl1e, l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
+    }
 
     return (void *)va;
 }